bitkeeper revision 1.1159.187.43 (41acb922MGgCQ71HoZ-M1-vpezLXKQ)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 30 Nov 2004 18:17:06 +0000 (18:17 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 30 Nov 2004 18:17:06 +0000 (18:17 +0000)
More simplifications to p.t. management.

linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c
linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c
linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h

index a59d760eb724fa10252986f7cfe1b450a5190908..a29a7f4e97250fa6dbd80171a865e553fc67750d 100644 (file)
@@ -513,7 +513,7 @@ void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
             va < gdt_descr->address + gdt_descr->size;
             va += PAGE_SIZE, f++) {
                frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
-               protect_page(swapper_pg_dir, (void *)va, PROT_ON);
+               make_page_readonly((void *)va);
        }
        flush_page_update_queue();
        if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
index e2408ea3f717e2e15f3941440a4b9beedb7f8a53..3a0a0eb674860a5118fc2d6ab1e26f8139fc897f 100644 (file)
@@ -77,12 +77,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
 {
        if (pmd_none(*pmd)) {
                pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-               /* XEN: Make the new p.t. read-only. */
-               pgd_t *kpgd = pgd_offset_k((unsigned long)page_table);
-               pmd_t *kpmd = pmd_offset(kpgd, (unsigned long)page_table);
-               pte_t *kpte = pte_offset_kernel(kpmd, (unsigned long)page_table);
-               xen_l1_entry_update(
-                       kpte, (*(unsigned long *)kpte)&~_PAGE_RW);
+               make_page_readonly(page_table);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
                if (page_table != pte_offset_kernel(pmd, 0))
                        BUG();  
@@ -131,22 +126,6 @@ static void __init page_table_range_init (unsigned long start, unsigned long end
        }
 }
 
-void __init protect_page(pgd_t *pgd, void *page, int mode)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       unsigned long addr;
-
-       addr = (unsigned long)page;
-       pgd += pgd_index(addr);
-       pmd = pmd_offset(pgd, addr);
-       pte = pte_offset_kernel(pmd, addr);
-       if (!pte_present(*pte))
-               return;
-       queue_l1_entry_update(pte, mode ? pte_val_ma(*pte) & ~_PAGE_RW :
-                                       pte_val_ma(*pte) | _PAGE_RW);
-}
-
 static inline int is_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
@@ -370,12 +349,12 @@ static void __init pagetable_init (void)
         * it. We clean up by write-enabling and then freeing the old page dir.
         */
        memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
-       protect_page(new_pgd, new_pgd, PROT_ON);
+       make_page_readonly(new_pgd);
        queue_pgd_pin(__pa(new_pgd));
        load_cr3(new_pgd);
        queue_pgd_unpin(__pa(old_pgd));
        __flush_tlb_all(); /* implicit flush */
-       protect_page(new_pgd, old_pgd, PROT_OFF);
+       make_page_writable(old_pgd);
        flush_page_update_queue();
        free_bootmem(__pa(old_pgd), PAGE_SIZE);
 
index 85f0f1162cbb3e234260ba051bee725a015b8b49..c82469bc0071ed115c07f1d62b77baa77d6d03ee 100644 (file)
@@ -54,12 +54,6 @@ void xen_cpu_idle (void);
 /* arch/xen/i386/kernel/hypervisor.c */
 void do_hypervisor_callback(struct pt_regs *regs);
 
-/* arch/xen/i386/mm/init.c */
-/* NOTE: caller must call flush_page_update_queue() */
-#define PROT_ON  1
-#define PROT_OFF 0
-void /* __init */ protect_page(pgd_t *dpgd, void *page, int mode);
-
 /* arch/xen/i386/kernel/head.S */
 void lgdt_finish(void);